#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
-extern int new_do_process_page_updates(page_update_request_t *, int);
+extern int do_process_page_updates_bh(page_update_request_t *, int);
extern int nr_mods;
extern module_t *mod;
return 0;
}
+
+/* final_setup_guestos is used for final setup and launching of domains other
+ * than domain 0. ie. the domains that are being built by the userspace dom0
+ * domain builder.
+ *
+ * Initial load map:
+ * start_address:
+ * OS image
+ * ....
+ * stack_start:
+ * start_info:
+ * <one page>
+ * page tables:
+ * <enough pages>
+ * end_address:
+ * shared_info:
+ * <one page>
+ */
int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
{
unsigned long long time;
unsigned long phys_l1tab, phys_l2tab;
page_update_request_t * pgt_updates;
+ unsigned long curr_update_phys;
unsigned long count;
net_ring_t *net_ring;
net_vif_t *net_vif;
/* first of all, set up domain pagetables */
pgt_updates = (page_update_request_t *)
map_domain_mem(meminfo->pgt_update_arr);
- printk(KERN_ALERT "bd240 debug: update request starting virt %lx, phys %lx\n", pgt_updates, meminfo->pgt_update_arr);
+ curr_update_phys = meminfo->pgt_update_arr;
for(count = 0; count < meminfo->num_pgt_updates; count++){
- printk(KERN_ALERT "bd240 debug: update pair %lx, %lx\n", pgt_updates->ptr, pgt_updates->val);
- new_do_process_page_updates(pgt_updates, 1);
+ do_process_page_updates_bh(pgt_updates, 1);
pgt_updates++;
if(!((unsigned long)pgt_updates & (PAGE_SIZE-1))){
- pgt_updates--;
- pgt_updates = (page_update_request_t *)map_domain_mem(
- ((frame_table + ((unsigned long)pgt_updates >>
- PAGE_SHIFT))->next) << PAGE_SHIFT);
+ unmap_domain_mem((void *)((unsigned long)(pgt_updates-1) & PAGE_MASK));
+ curr_update_phys = (frame_table + (curr_update_phys >> PAGE_SHIFT))->next
+ << PAGE_SHIFT;
+ pgt_updates = (page_update_request_t *)map_domain_mem(curr_update_phys);
}
}
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | PAGE_HYPERVISOR);
p->mm.pagetable = mk_pagetable(phys_l2tab);
+ unmap_domain_mem(l2tab);
/* map in the shared info structure */
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(meminfo->virt_shinfo_addr) * sizeof(l2_pgentry_t));
+ phys_l2tab = pagetable_val(p->mm.pagetable);
l2tab = map_domain_mem(phys_l2tab);
+ l2tab += l2_table_offset(meminfo->virt_shinfo_addr);
phys_l1tab = l2_pgentry_to_phys(*l2tab) +
(l1_table_offset(meminfo->virt_shinfo_addr) * sizeof(l1_pgentry_t));
l1tab = map_domain_mem(phys_l1tab);
*l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
+ unmap_domain_mem(l2tab);
+ unmap_domain_mem(l1tab);
/* set up the shared info structure */
rdtscll(time);
return 0;
}
-
-/*
- * Initial load map:
- * start_address:
- * OS image
- * ....
- * stack_start:
- * start_info:
- * <one page>
- * page tables:
- * <enough pages>
- * end_address:
- * shared_info:
- * <one page>
+
+static unsigned long alloc_page_from_domain(unsigned long * cur_addr,
+ unsigned long * index)
+{
+ *cur_addr = (frame_table + (*cur_addr >> PAGE_SHIFT))->prev << PAGE_SHIFT;
+ (*index)--;
+ return *cur_addr;
+}
+
+/* setup_guestos is used for building dom0 solely. other domains are built in
+ * userspace dom0 and final setup is being done by final_setup_guestos.
*/
-#define MB_PER_DOMAIN 16
int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
{
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
-#define ALLOC_FRAME_FROM_DOMAIN() (alloc_address -= PAGE_SIZE)
char *src, *dst;
int i, dom = p->domain;
- unsigned long start_address, phys_l1tab, phys_l2tab;
- unsigned long cur_address, end_address, alloc_address, vaddr;
+ unsigned long phys_l1tab, phys_l2tab;
+ unsigned long cur_address, alloc_address;
unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
- unsigned long virt_ftable_start_addr = 0, virt_ftable_end_addr;
- unsigned long ft_mapping = (unsigned long)frame_table;
- unsigned int ft_size = 0;
start_info_t *virt_startinfo_address;
unsigned long long time;
+ unsigned long count;
+ unsigned long alloc_index;
l2_pgentry_t *l2tab, *l2start;
l1_pgentry_t *l1tab = NULL;
struct pfn_info *page = NULL;
}
if ( alloc_new_dom_mem(p, params->memory_kb) ) return -ENOMEM;
-
- /* temporary, *_address have to be reimplemented in another way
- * as we can no longer expect contiguous addr space
- */
- start_address = p->pg_head << PAGE_SHIFT;
- alloc_address = end_address = start_address + (p->tot_pages << PAGE_SHIFT);
-
- /* start_address += (dom * MB_PER_DOMAIN) << 20; */ /* MB -> bytes */
- /* alloc_address = end_address = start_address + (MB_PER_DOMAIN << 20); */
+ alloc_address = p->pg_head << PAGE_SHIFT;
+ alloc_index = p->tot_pages;
if ( (mod[nr_mods-1].mod_end-mod[0].mod_start) >
- ((end_address-start_address)>>1) )
+ (params->memory_kb << 9) )
{
printk("DOM%d: Guest OS image is too large\n"
- " (%luMB is greater than %luMB limit for a\n"
- " %luMB address space)\n",
+ " (%luMB is greater than %uMB limit for a\n"
+ " %uMB address space)\n",
dom, (mod[nr_mods-1].mod_end-mod[0].mod_start)>>20,
- (end_address-start_address)>>21,
- (end_address-start_address)>>20);
+ (params->memory_kb)>>11,
+ (params->memory_kb)>>10);
/* XXX should free domain memory here XXX */
return -1;
}
- /* Set up initial mappings. */
- printk("DOM%d: Mapping physmem %08lx -> %08lx (%luMB)\n", dom,
- start_address, end_address, (end_address-start_address)>>20);
printk("DOM%d: Guest OS virtual load address is %08lx\n", dom,
virt_load_address);
* WARNING: The new domain must have its 'processor' field
* filled in by now !!
*/
- phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
+ phys_l2tab = alloc_page_from_domain(&alloc_address, &alloc_index);
l2start = l2tab = map_domain_mem(phys_l2tab);
memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
* make sure a pte exists when we want to map the shared_info struct.
*/
- /* bd240: not only one extra page but one + num of pages required for
- * frame_table if domain 0 is in question. this ugly for loop
- * condition is going to change once domain building is moved out
- * of hypervisor.
- */
-
- if(dom == 0)
- ft_size = frame_table_size;
-
l2tab += l2_table_offset(virt_load_address);
- for ( cur_address = start_address;
- cur_address != (end_address + PAGE_SIZE + ft_size);
- cur_address += PAGE_SIZE )
+ cur_address = p->pg_head << PAGE_SHIFT;
+ for ( count = 0;
+ count < p->tot_pages + 1;
+ count++)
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
- phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
+ phys_l1tab = alloc_page_from_domain(&alloc_address, &alloc_index);
*l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
l1tab = map_domain_mem(phys_l1tab);
clear_page(l1tab);
l1tab += l1_table_offset(
- virt_load_address + cur_address - start_address);
+ virt_load_address + (count << PAGE_SHIFT));
}
- *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
- /* New domain doesn't own shared_info page, or frame_table. */
- if ( cur_address < end_address )
+ if( count < alloc_index )
{
+ *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_writeable_page;
page->type_count = page->tot_count = 1;
- }
- }
- unmap_domain_mem(l1tab-1);
-
- /* Pages that are part of page tables must be read-only. */
- vaddr = virt_load_address + alloc_address - start_address;
- l2tab = l2start + l2_table_offset(vaddr);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l1tab += l1_table_offset(vaddr);
- l2tab++;
- for ( cur_address = alloc_address;
- cur_address != end_address;
- cur_address += PAGE_SIZE )
- {
- if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
+ }
+ else
{
- unmap_domain_mem(l1tab-1);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l2tab++;
+ *l1tab++ = mk_l1_pgentry((cur_address|L1_PROT) & ~_PAGE_RW);
+ page = frame_table + (cur_address >> PAGE_SHIFT);
+ page->flags = dom | PGT_l1_page_table;
+ page->type_count = 1;
+ page->tot_count = 2;
}
- *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
- page = frame_table + (cur_address >> PAGE_SHIFT);
- page->flags = dom | PGT_l1_page_table;
- page->tot_count++;
+
+ cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
}
unmap_domain_mem(l1tab-1);
+ page = frame_table + (frame_table + p->pg_head)->prev;
page->flags = dom | PGT_l2_page_table;
/* Map in the the shared info structure. */
- virt_shinfo_address = end_address - start_address + virt_load_address;
+ virt_shinfo_address = virt_load_address + (p->tot_pages << PAGE_SHIFT);
l2tab = l2start + l2_table_offset(virt_shinfo_address);
l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
l1tab += l1_table_offset(virt_shinfo_address);
p->shared_info->domain_time = time;
p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
- /* for DOM0, setup mapping of frame table */
- if ( dom == 0 )
- {
- virt_ftable_start_addr = virt_shinfo_address + PAGE_SIZE;
- virt_ftable_end_addr = virt_ftable_start_addr + frame_table_size;
- for(cur_address = virt_ftable_start_addr;
- cur_address < virt_ftable_end_addr;
- cur_address += PAGE_SIZE)
- {
- l2tab = l2start + l2_table_offset(cur_address);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l1tab += l1_table_offset(cur_address);
- *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
- unmap_domain_mem(l1tab);
- ft_mapping += PAGE_SIZE;
- }
- }
-
virt_startinfo_address = (start_info_t *)
- (alloc_address - start_address - PAGE_SIZE + virt_load_address);
+ (virt_load_address + ((alloc_index - 1) << PAGE_SHIFT));
virt_stack_address = (unsigned long)virt_startinfo_address;
unmap_domain_mem(l2start);
/* Set up start info area. */
memset(virt_startinfo_address, 0, sizeof(*virt_startinfo_address));
- virt_startinfo_address->nr_pages = (end_address-start_address)>>PAGE_SHIFT;
+ virt_startinfo_address->nr_pages = p->tot_pages;
virt_startinfo_address->shared_info =
(shared_info_t *)virt_shinfo_address;
- virt_startinfo_address->pt_base =
- end_address - PAGE_SIZE - start_address + virt_load_address;
- virt_startinfo_address->phys_base = start_address;
- /* NB. Next field will be NULL if dom != 0. */
- virt_startinfo_address->frame_table = virt_ftable_start_addr;
- virt_startinfo_address->frame_table_len = ft_size;
- virt_startinfo_address->frame_table_pa = __pa(frame_table);
+ virt_startinfo_address->pt_base = virt_load_address +
+ ((p->tot_pages - 1) << PAGE_SHIFT);
+ virt_startinfo_address->phys_base = p->pg_head << PAGE_SHIFT;
/* Add virtual network interfaces and point to them in startinfo. */
while (params->num_vifs-- > 0) {
if ( (l1_pgentry_val(new_l1_entry) &
(_PAGE_GLOBAL|_PAGE_PAT)) )
{
+
+ printk(KERN_ALERT "bd240 debug: bad l1 entry val %lx\n", l1_pgentry_val(new_l1_entry) & (_PAGE_GLOBAL | _PAGE_PAT));
+
MEM_LOG("Bad L1 entry val %04lx",
l1_pgentry_val(new_l1_entry) &
(_PAGE_GLOBAL|_PAGE_PAT));
}
if ( get_page(l1_pgentry_to_pagenr(new_l1_entry),
- l1_pgentry_val(new_l1_entry) & _PAGE_RW) )
+ l1_pgentry_val(new_l1_entry) & _PAGE_RW) ){
+ printk(KERN_ALERT "bd240 debug: get_page err\n");
goto fail;
+ }
}
}
else if ( (l1_pgentry_val(old_l1_entry) & _PAGE_PRESENT) )
return err;
}
-/* Apply updates to page table @pagetable_id within the current domain. */
-int do_process_page_updates(page_update_request_t *updates, int count)
+/* functions to handle page table updates: upper half is invoked in case pt updates
+ * are requested by a domain and it invokes copy_from_user. bottom half is invoked
+ * both in case of domain downcall and domain building by hypervisor.
+ */
+page_update_request_t * do_process_page_updates_uh(page_update_request_t *updates,
+ int count)
{
- page_update_request_t cur;
- unsigned long flags, pfn;
- struct pfn_info *page;
- int err = 0, i;
-
- for ( i = 0; i < count; i++ )
- {
- if ( copy_from_user(&cur, updates, sizeof(cur)) )
- {
- kill_domain_with_errmsg("Cannot read page update request");
- }
-
- pfn = cur.ptr >> PAGE_SHIFT;
- if ( pfn >= max_page )
- {
- MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
- kill_domain_with_errmsg("Page update request out of range");
- }
-
- err = 1;
-
- /* Least significant bits of 'ptr' demux the operation type. */
- switch ( cur.ptr & (sizeof(l1_pgentry_t)-1) )
- {
-
- /*
- * PGREQ_NORMAL: Normal update to any level of page table.
- */
- case PGREQ_NORMAL:
- page = frame_table + pfn;
- flags = page->flags;
- if ( DOMAIN_OKAY(flags) )
- {
- switch ( (flags & PG_type_mask) )
- {
- case PGT_l1_page_table:
- err = mod_l1_entry(cur.ptr, mk_l1_pgentry(cur.val));
- break;
- case PGT_l2_page_table:
- err = mod_l2_entry(cur.ptr, mk_l2_pgentry(cur.val));
- break;
- default:
- MEM_LOG("Update to non-pt page %08lx", cur.ptr);
- break;
- }
- }
- break;
-
- /*
- * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
- * bottom-level page-table entry.
- * Restrictions apply:
- * 1. Update only allowed by domain 0.
- * 2. Update must be to a level-1 pte belonging to dom0.
- */
- case PGREQ_UNCHECKED_UPDATE:
- cur.ptr &= ~(sizeof(l1_pgentry_t) - 1);
- page = frame_table + pfn;
- flags = page->flags;
- if ( (flags | current->domain) == PGT_l1_page_table )
- {
- unsigned long *va = map_domain_mem(cur.ptr);
- *va = cur.val;
- unmap_domain_mem(va);
- err = 0;
- }
- else
- {
- MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
- " bad pte type %08lx", current->domain, flags);
- }
- break;
-
- /*
- * PGREQ_EXTENDED_COMMAND: Extended command is specified
- * in the least-siginificant bits of the 'value' field.
- */
- case PGREQ_EXTENDED_COMMAND:
- cur.ptr &= ~(sizeof(l1_pgentry_t) - 1);
- err = do_extended_command(cur.ptr, cur.val);
- break;
-
- default:
- MEM_LOG("Invalid page update command %08lx", cur.ptr);
- break;
- }
-
- if ( err )
- {
- page = frame_table + (cur.ptr >> PAGE_SHIFT);
- printk(KERN_ALERT "bd240 debug: Update request %d\n", cur.ptr & (sizeof(l1_pgentry_t) - 1));
- printk(KERN_ALERT "bd240 debug: Update request %lx, %lx\n", cur.ptr, cur.val);
- printk(KERN_ALERT "bd240 debug: Page flags %lx\n", page->flags);
-
- kill_domain_with_errmsg("Illegal page update request");
- }
-
- updates++;
- }
+ page_update_request_t * ret = kmalloc(sizeof(page_update_request_t) * count,
+ GFP_KERNEL);
- if ( tlb_flush[smp_processor_id()] )
+ if ( copy_from_user(ret, updates, sizeof(page_update_request_t) * count) )
{
- tlb_flush[smp_processor_id()] = 0;
- __asm__ __volatile__ (
- "movl %%eax,%%cr3" : :
- "a" (pagetable_val(current->mm.pagetable)));
+ kill_domain_with_errmsg("Cannot read page update request");
}
-
- return(0);
+
+ return ret;
}
/* Apply updates to page table @pagetable_id within the current domain. */
-int new_do_process_page_updates(page_update_request_t * cur, int count)
+int do_process_page_updates_bh(page_update_request_t * cur, int count)
{
unsigned long flags, pfn;
struct pfn_info *page;
page = frame_table + pfn;
flags = page->flags;
- printk(KERN_ALERT "bd240 debug: normal update\n");
-
- if ( (flags & PG_domain_mask) == current->domain )
+ if ( DOMAIN_OKAY(flags) )
{
- printk(KERN_ALERT "bd240 debug: normal update inside\n");
switch ( (flags & PG_type_mask) )
{
case PGT_l1_page_table:
}
}
- printk(KERN_ALERT "bd240 debug: normal update finish\n");
-
break;
/*
if ( err )
{
page = frame_table + (cur->ptr >> PAGE_SHIFT);
- printk(KERN_ALERT "bd240 debug: Update request %lx\n", cur->ptr & (sizeof(l1_pgentry_t) - 1));
- printk(KERN_ALERT "bd240 debug: Update request %lx, %lx\n", cur->ptr, cur->val);
- printk(KERN_ALERT "bd240 debug: Page flags %lx\n", page->flags);
-
kill_domain_with_errmsg("Illegal page update request");
}
return(0);
}
+
+/* Apply updates to page table @pagetable_id within the current domain. */
+int do_process_page_updates(page_update_request_t *updates, int count)
+{
+ page_update_request_t * pg_updates;
+
+ pg_updates = do_process_page_updates_uh(updates, count);
+ return do_process_page_updates_bh(pg_updates, count);
+}
#define XENO_BASE "xeno" // proc file name defs should be in separate .h
#define DOM0_CMD_INTF "dom0_cmd"
-#define DOM0_FT "frame_table"
#define DOM0_NEWDOM "new_dom_data"
#define MAX_LEN 16
#define DOM_DIR "dom"
-#define DOM_TS "task_data"
#define DOM_MEM "mem"
static struct proc_dir_entry *xeno_base;
int direct_unmap(unsigned long, unsigned long);
int direct_disc_unmap(unsigned long, unsigned long, int);
-/* frame_table mapped from dom0 */
-frame_table_t * frame_table;
-unsigned long frame_table_len;
-unsigned long frame_table_pa;
-
static unsigned char readbuf[1204];
static int cmd_read_proc(char *page, char **start, off_t off,
return strlen(page);
}
-static ssize_t ts_read(struct file * file, char * buff, size_t size, loff_t * off)
-{
- dom0_op_t op;
- unsigned long addr;
- pgprot_t prot;
- int ret = 0;
-
- /* retrieve domain specific data from proc_dir_entry */
- dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
-
- /*
- * get the phys addr of the task struct for the requested
- * domain
- */
- op.cmd = DOM0_MAPTASK;
- op.u.mapdomts.domain = dom_data->domain;
- op.u.mapdomts.ts_phy_addr = -1;
-
- ret = HYPERVISOR_dom0_op(&op);
- if(ret != 0)
- return -EAGAIN;
-
- prot = PAGE_SHARED;
-
- /* remap the range using xen specific routines */
- addr = direct_mmap(op.u.mapdomts.ts_phy_addr, PAGE_SIZE, prot, 0, 0);
- copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
- dom_data->map_size = PAGE_SIZE;
-
- return sizeof(addr);
-
-}
-
-static ssize_t ts_write(struct file * file, const char * buff, size_t size , loff_t * off)
-{
- unsigned long addr;
- dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
-
- copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
-
- if(direct_unmap(addr, dom_data->map_size) == 0){
- return sizeof(addr);
- } else {
- return -1;
- }
-}
-
-struct file_operations ts_ops = {
- read: ts_read,
- write: ts_write,
-};
-
static void create_proc_dom_entries(int dom)
{
struct proc_dir_entry * dir;
dir = proc_mkdir(dir_name, xeno_base);
dir->data = dom_data;
-
- file = create_proc_entry(DOM_TS, 0600, dir);
- if(file != NULL)
- {
- file->owner = THIS_MODULE;
- file->nlink = 1;
- file->proc_fops = &ts_ops;
-
- file->data = dom_data;
- }
}
static ssize_t dom_mem_write(struct file * file, const char * buff,
}
-static ssize_t ft_write(struct file * file, const char * buff, size_t size , loff_t * off)
-{
- unsigned long addr;
-
- copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
-
- if(direct_unmap(addr, frame_table_len) == 0){
- return sizeof(addr);
- } else {
- return -1;
- }
-}
-
-static ssize_t ft_read(struct file * file, char * buff, size_t size, loff_t * off)
-{
- unsigned long addr;
- pgprot_t prot;
-
- prot = PAGE_SHARED;
-
- /* remap the range using xen specific routines */
- addr = direct_mmap(frame_table_pa, frame_table_len, prot, 0, 0);
- copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
-
- return sizeof(addr);
-
-}
-
-struct file_operations ft_ops = {
- read: ft_read,
- write: ft_write,
-};
-
static int __init init_module(void)
{
- frame_table = (frame_table_t *)start_info.frame_table;
- frame_table_len = start_info.frame_table_len;
- frame_table_pa = start_info.frame_table_pa;
-
/* xeno proc root setup */
xeno_base = proc_mkdir(XENO_BASE, &proc_root);
dom0_cmd_intf->write_proc = cmd_write_proc;
}
- /* frame table mapping, to be mmaped */
- proc_ft = create_proc_entry(DOM0_FT, 0600, xeno_base);
- if(proc_ft != NULL)
- {
- proc_ft->owner = THIS_MODULE;
- proc_ft->nlink = 1;
- proc_ft->proc_fops = &ft_ops;
- }
-
/* set up /proc entries for dom 0 */
create_proc_dom_entries(0);